Ref: Click here
from PIL import Image
from IPython.display import display
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import pylab, random, os, math, sklearn
import numpy as np
import tensorflow as tf
# To cancel scientific notation
np.set_printoptions(suppress=True)
%matplotlib inline
# %matplotlib tk
%config InlineBackend.figure_format = 'retina'
# To set plotting style
plt.style.use("classic")
plt.style.use("bmh")
plt.style.use("seaborn-whitegrid")
# To hide warnings
import warnings
warnings.filterwarnings("ignore")
Feedforward Neural Networks, also known as Deep feedforward Networks or Multi-layer Perceptrons. They form the basis of many important Neural Networks being used in the recent times, such as Convolutional Neural Networks ( used extensively in computer vision applications ), Recurrent Neural Networks ( widely used in Natural language understanding and sequence learning) and so on.

No feedback connections or loops in the network.
It has an input layer, an output layer, and a hidden layer.
In general, there can be multiple hidden layers.
Each node in the layer is a Neuron, which can be thought of as the basic processing unit of a Neural Network.
It works in two steps – It calculates the weighted sum of its inputs and then applies an activation function to normalize the sum. The activation functions can be linear or nonlinear. Also, there are weights associated with each input of a neuron. These are the parameters which the network has to learn during the training phase.

The neuron learns Linear or Non-linear decision boundaries based on the activation function.
Sigmoid It maps the input ( x axis ) to values between 0 and 1.
Tanh It is similar to the sigmoid function butmaps the input to values between -1 and 1.
Rectified Linear Unit (ReLU) It allows only positive values to pass through it. The negative values are mapped to zero.
The first layer of a neural network is used to provide the input data or features to the network.
The output layer which gives out the predictions. The activation function to be used in this layer is different for different problems.
A feedforward network applies a series of functions to the input. By having multiple hidden layers, we can compute complex functions by cascading simpler functions.
The choice of hidden units is a very active research area in Machine Learning. The type of hidden layer distinguishes the different types of Neural Networks like CNNs, RNNs etc. The number of hidden layers is termed as the depth of the neural network.
The training samples are passed through the network and the output obtained from the network is compared with the actual output. This error is used to change the weights of the neurons such that the error decreases gradually. This is done using the Backpropagation algorithm, also called backprop. Iteratively passing batches of data through the network and updating the weights, so that the error is decreased, is known as Stochastic Gradient Descent (SGD). The amount by which the weights are changed is determined by a parameter called Learning rate. The details of SGD and backprop will be covered in a separate post.
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train[0][10]
plt.imshow(x_train[0], cmap="gray")
plt.show()
y_train[0]
x_train.shape
x_test.shape
x_train=tf.keras.utils.normalize(x_train,axis=1)
x_test=tf.keras.utils.normalize(x_test,axis=1)
plt.imshow(x_train[0], cmap="gray")
plt.show()
x_train[0][10]
Input layer 748, 2 hidden layers 128, output layer 10.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10,activation=tf.nn.softmax))
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train,y_train,epochs=5)
%%capture
val_loss, val_acc = model.evaluate(x_test,y_test)
val_loss
val_acc
predictions = model.predict([x_test[:11]])
x = predictions > 0.5
[np.array([0,1,2,3,4,5,6,7,8,9])[x[i]][0] for i in range(10)]
plt.figure(figsize=(30,3))
for i in range(10):
plt.subplot(1,10,i+1)
plt.imshow(x_test[i], cmap="gray")
plt.show()
y_test[:10]
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
def load_crack_image(cracked_path, uncracked_path, color_mode="grayscale"):
cracked_list = os.listdir(cracked_path)
uncracked_list = os.listdir(uncracked_path)
cracked = np.array([img_to_array(load_img(cracked_path + img,
color_mode=color_mode)) for img in cracked_list])
uncracked = np.array([img_to_array(load_img(uncracked_path + img,
color_mode=color_mode)) for img in uncracked_list])
cracked = cracked.reshape((cracked.shape[0], cracked.shape[1], cracked.shape[2]))
uncracked = uncracked.reshape((uncracked.shape[0], uncracked.shape[1], uncracked.shape[2]))
return cracked, uncracked
cracked_path = "/Users/ray/Desktop/crack-images/kaggle-data/train/cracked/"
uncracked_path = "/Users/ray/Desktop/crack-images/kaggle-data/train/uncracked/"
cracked, uncracked = load_crack_image(cracked_path, uncracked_path)
plt.subplot(121)
plt.imshow(cracked[0], cmap="gray")
plt.subplot(122)
plt.imshow(uncracked[0], cmap="gray")
plt.show()
def crack_split(cracked, uncracked):
y_cracked = np.ones(cracked.shape[0])
y_uncracked = np.zeros(uncracked.shape[0])
y = np.concatenate([y_cracked, y_uncracked])
x = np.concatenate([cracked, uncracked])
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=100)
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
return x_train, x_test, y_train, y_test
x_train, x_test, y_train, y_test = crack_split(cracked, uncracked)
plt.figure(dpi=500)
for i in range(25):
plt.subplot(5,5,i+1)
plt.imshow(x_train[i], cmap="gray")
plt.title(y_train[i], fontsize=5)
plt.axis('off')
plt.show()
y_test[:100]
def histeq(im,nbr_bins=256):
""" Histogram equalization of a grayscale image. """
# get image histogram
imhist,bins = np.histogram(im.flatten(),nbr_bins,normed=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
im2 = np.interp(im.flatten(),bins[:-1],cdf)
return im2.reshape(im.shape)
for i in range(len(x_train)):
x_train[i] = histeq(x_train[i], 5)
for i in range(len(x_test)):
x_test[i] = histeq(x_test[i], 5)
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
plt.figure(dpi=500)
for i in range(25):
plt.subplot(5,5,i+1)
plt.imshow(x_train[i], cmap="gray")
plt.title(y_train[i], fontsize=5)
plt.axis('off')
plt.show()
def FNN_Model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(64,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1,activation=tf.nn.sigmoid))
model.compile(optimizer='adam',loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.Precision(),
tf.keras.metrics.TruePositives()])
return model
model = FNN_Model()
model.fit(x_train,y_train,epochs=5)
model.summary()
%%capture
val_loss, val_acc, val_pre, val_TP = model.evaluate(x_test,y_test)
result = {}
result["original"] = [val_loss, val_acc, val_pre, val_TP/(y_test == 1).sum()]
val_loss, val_acc, val_pre, val_TP/(y_test == 1).sum()
predictions = model.predict([x_test[:10]])
predictions
y_test[:10]
plt.figure(dpi=500)
for i in range(10):
plt.subplot(2,5,i+1)
plt.imshow(x_test[i], cmap="gray")
plt.title(y_test[i], fontsize=5)
plt.axis('off')
plt.show()
cracked_path = "/Users/ray/Desktop/crack-images/Canny/train/cracked/"
uncracked_path = "/Users/ray/Desktop/crack-images/Canny/train/uncracked/"
cracked, uncracked = load_crack_image(cracked_path, uncracked_path)
plt.subplot(121)
plt.imshow(cracked[0], cmap="gray")
plt.subplot(122)
plt.imshow(uncracked[0], cmap="gray")
plt.show()
x_train, x_test, y_train, y_test = crack_split(cracked, uncracked)
model = FNN_Model()
model.fit(x_train,y_train,epochs=5)
%%capture
val_loss, val_acc, val_pre, val_TP = model.evaluate(x_test,y_test)
result["canny"] = [val_loss, val_acc, val_pre, val_TP/(y_test == 1).sum()]
val_loss, val_acc, val_pre, val_TP/(y_test == 1).sum()
cracked_path = "/Users/ray/Desktop/crack-images/TH/train/cracked/"
uncracked_path = "/Users/ray/Desktop/crack-images/TH/train/uncracked/"
cracked, uncracked = load_crack_image(cracked_path, uncracked_path)
plt.subplot(121)
plt.imshow(cracked[0], cmap="gray")
plt.subplot(122)
plt.imshow(uncracked[0], cmap="gray")
plt.show()
x_train, x_test, y_train, y_test = crack_split(cracked, uncracked)
model = FNN_Model()
model.fit(x_train,y_train,epochs=5)
%%capture
val_loss, val_acc, val_pre, val_TP = model.evaluate(x_test,y_test)
result["thresholding"] = [val_loss, val_acc, val_pre, val_TP/(y_test == 1).sum()]
val_loss, val_acc, val_pre, val_TP/(y_test == 1).sum()
cracked_path = "/Users/ray/Desktop/crack-images/XCSLBP/train/cracked/"
uncracked_path = "/Users/ray/Desktop/crack-images/XCSLBP/train/uncracked/"
cracked, uncracked = load_crack_image(cracked_path, uncracked_path)
plt.subplot(121)
plt.imshow(cracked[0], cmap="gray")
plt.subplot(122)
plt.imshow(uncracked[0], cmap="gray")
plt.show()
x_train, x_test, y_train, y_test = crack_split(cracked, uncracked)
model = FNN_Model()
model.fit(x_train,y_train,epochs=5)
%%capture
val_loss, val_acc, val_pre, val_TP = model.evaluate(x_test,y_test)
result["XCSLBP"] = [val_loss, val_acc, val_pre, val_TP/(y_test == 1).sum()]
val_loss, val_acc, val_pre, val_TP/(y_test == 1).sum()
import pandas as pd
result = pd.DataFrame(result)
result.index = ["loss", "Accuracy", "Precision", "TPR"]
result